Move count_info outside pfn_info union, to where it belongs.
{
page = list_entry(ent, struct pfn_info, list);
- if ( test_and_clear_bit(_PGC_allocated, &page->u.inuse.count_info) )
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
}
{
page = list_entry(ent, struct pfn_info, list);
- if ( test_and_clear_bit(_PGC_guest_pinned, &page->u.inuse.count_info) )
+ if ( test_and_clear_bit(_PGC_guest_pinned, &page->count_info) )
put_page_and_type(page);
- if ( test_and_clear_bit(_PGC_allocated, &page->u.inuse.count_info) )
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
/*
mfn++ )
{
page = &frame_table[mfn];
- page->u.inuse.domain = p;
- page->u.inuse.type_info = 0;
- page->u.inuse.count_info = PGC_always_set | PGC_allocated | 1;
+ page->u.inuse.domain = p;
+ page->u.inuse.type_info = 0;
+ page->count_info = PGC_always_set | PGC_allocated | 1;
list_add_tail(&page->list, &p->page_list);
p->tot_pages++; p->max_pages++;
}
*l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
page = &frame_table[mfn];
- set_bit(_PGC_tlb_flush_on_type_change, &page->u.inuse.count_info);
+ set_bit(_PGC_tlb_flush_on_type_change, &page->count_info);
if ( !get_page_and_type(page, p, PGT_writable_page) )
BUG();
/* Get another ref to L2 page so that it can be pinned. */
if ( !get_page_and_type(page, p, PGT_l2_page_table) )
BUG();
- set_bit(_PGC_guest_pinned, &page->u.inuse.count_info);
+ set_bit(_PGC_guest_pinned, &page->count_info);
}
else
{
static void ptwr_disable(void);
unsigned long mfn;
+ /*
+ * We are rather picky about the layout of 'struct pfn_info'. The
+ * count_info and domain fields must be adjacent, as we perform atomic
+ * 64-bit operations on them. Also, just for sanity, we assert the size
+ * of the structure here.
+ */
+ if ( (offsetof(struct pfn_info, u.inuse.domain) !=
+ (offsetof(struct pfn_info, count_info) + sizeof(u32))) ||
+ (sizeof(struct pfn_info) != 24) )
+ {
+ printk("Weird pfn_info layout (%ld,%ld,%d)\n",
+ offsetof(struct pfn_info, count_info),
+ offsetof(struct pfn_info, u.inuse.domain),
+ sizeof(struct pfn_info));
+ for ( ; ; ) ;
+ }
+
memset(percpu_info, 0, sizeof(percpu_info));
vm_assist_info[VMASST_TYPE_writable_pagetables].enable =
ptwr_disable;
for ( mfn = 0; mfn < max_page; mfn++ )
- frame_table[mfn].u.inuse.count_info |= PGC_always_set;
+ frame_table[mfn].count_info |= PGC_always_set;
/* Initialise to a magic of 0x55555555 so easier to spot bugs later. */
memset(machine_to_phys_mapping, 0x55, 4<<20);
mfn < virt_to_phys(&machine_to_phys_mapping[1<<20])>>PAGE_SHIFT;
mfn++ )
{
- frame_table[mfn].u.inuse.count_info |= PGC_allocated | 1;
- frame_table[mfn].u.inuse.type_info = PGT_gdt_page | 1; /* non-RW */
- frame_table[mfn].u.inuse.domain = dom_xen;
+ frame_table[mfn].count_info |= PGC_allocated | 1;
+ frame_table[mfn].u.inuse.type_info = PGT_gdt_page | 1; /* non-RW */
+ frame_table[mfn].u.inuse.domain = dom_xen;
}
}
* No need for LOCK prefix -- we know that count_info is never zero
* because it contains PGC_always_set.
*/
+ ASSERT(test_bit(_PGC_always_set, &page->count_info));
__asm__ __volatile__(
"cmpxchg8b %2"
- : "=a" (e), "=d" (count_info),
- "=m" (*(volatile u64 *)(&page->u.inuse.domain))
- : "0" (0), "1" (0), "b" (0), "c" (0) );
+ : "=d" (e), "=a" (count_info),
+ "=m" (*(volatile u64 *)(&page->count_info))
+ : "0" (0), "1" (0), "c" (0), "b" (0) );
if ( unlikely((count_info & PGC_count_mask) == 0) ||
unlikely(e == NULL) || unlikely(!get_domain(e)) )
return 0;
{
if ( unlikely(!get_page_type(page, PGT_writable_page)) )
return 0;
- set_bit(_PGC_tlb_flush_on_type_change, &page->u.inuse.count_info);
+ set_bit(_PGC_tlb_flush_on_type_change, &page->count_info);
}
return 1;
int alloc_page_type(struct pfn_info *page, unsigned int type)
{
if ( unlikely(test_and_clear_bit(_PGC_tlb_flush_on_type_change,
- &page->u.inuse.count_info)) )
+ &page->count_info)) )
{
struct domain *p = page->u.inuse.domain;
if ( unlikely(NEED_FLUSH(tlbflush_time[p->processor],
break;
}
- if ( unlikely(test_and_set_bit(_PGC_guest_pinned,
- &page->u.inuse.count_info)) )
+ if ( unlikely(test_and_set_bit(_PGC_guest_pinned,
+ &page->count_info)) )
{
MEM_LOG("Pfn %08lx already pinned", pfn);
put_page_and_type(page);
ptr, page->u.inuse.domain);
}
else if ( likely(test_and_clear_bit(_PGC_guest_pinned,
- &page->u.inuse.count_info)) )
+ &page->count_info)) )
{
put_page_and_type(page);
put_page(page);
* disappears then the deallocation routine will safely spin.
*/
nd = page->u.inuse.domain;
- y = page->u.inuse.count_info;
+ y = page->count_info;
do {
x = y;
if ( unlikely((x & (PGC_count_mask|PGC_allocated)) !=
}
__asm__ __volatile__(
LOCK_PREFIX "cmpxchg8b %3"
- : "=a" (nd), "=d" (y), "=b" (e),
- "=m" (*(volatile u64 *)(&page->u.inuse.domain))
- : "0" (d), "1" (x), "b" (e), "c" (x) );
+ : "=d" (nd), "=a" (y), "=c" (e),
+ "=m" (*(volatile u64 *)(&page->count_info))
+ : "0" (d), "1" (x), "c" (e), "b" (x) );
}
while ( unlikely(nd != d) || unlikely(y != x) );
l1_pgentry_val(linear_pg_table[(unsigned long)pl2e >>
PAGE_SHIFT]) >> PAGE_SHIFT,
frame_table[pfn].u.inuse.type_info,
- frame_table[pfn].u.inuse.count_info,
+ frame_table[pfn].count_info,
frame_table[pfn].u.inuse.domain->domain));
nl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT);
PTWR_PRINTK(("[A] now pl2e %p l2e %08lx taf %08x/%08x/%u\n",
pl2e, l2_pgentry_val(*pl2e),
frame_table[pfn].u.inuse.type_info,
- frame_table[pfn].u.inuse.count_info,
+ frame_table[pfn].count_info,
frame_table[pfn].u.inuse.domain->domain));
ptwr_info[cpu].disconnected = ENTRIES_PER_L2_PAGETABLE;
/* make pt page write protected */
l1_pgentry_t *pl1e;
PTWR_PRINTK(("[I] freeing l1 page %p taf %08x/%08x\n", page,
page->u.inuse.type_info,
- page->u.inuse.count_info));
+ page->count_info));
if (ptwr_info[cpu].writable_idx == PTWR_NR_WRITABLES)
ptwr_flush_inactive();
ptwr_info[cpu].writables[ptwr_info[cpu].writable_idx] = addr;
>> PAGE_SHIFT]) >>
PAGE_SHIFT,
frame_table[pfn].u.inuse.type_info,
- frame_table[pfn].u.inuse.count_info,
+ frame_table[pfn].count_info,
frame_table[pfn].u.inuse.domain->domain));
/* disconnect l1 page */
nl2e = mk_l2_pgentry((l2_pgentry_val(*pl2e) & ~_PAGE_PRESENT));
PTWR_PRINTK(("[A] now pl2e %p l2e %08lx "
"taf %08x/%08x/%u\n", pl2e, l2_pgentry_val(*pl2e),
frame_table[pfn].u.inuse.type_info,
- frame_table[pfn].u.inuse.count_info,
+ frame_table[pfn].count_info,
frame_table[pfn].u.inuse.domain->domain));
ptwr_info[cpu].writable_l1 = addr;
pl1e = map_domain_mem(l2_pgentry_to_pagenr(nl2e) <<
break;
}
- if ( page->u.inuse.count_info & PGC_guest_pinned )
+ if ( page->count_info & PGC_guest_pinned )
type |= LPINTAB;
l_arr[j] |= type;
put_page(page);
return i;
}
- if ( test_and_clear_bit(_PGC_guest_pinned,
- &page->u.inuse.count_info) )
+ if ( test_and_clear_bit(_PGC_guest_pinned, &page->count_info) )
put_page_and_type(page);
- if ( test_and_clear_bit(_PGC_allocated,
- &page->u.inuse.count_info) )
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
put_page(page);
{
page = list_entry(ent, struct pfn_info, list);
printk("Page %08x: caf=%08x, taf=%08x\n",
- page_to_phys(page), page->u.inuse.count_info,
+ page_to_phys(page), page->count_info,
page->u.inuse.type_info);
}
}
page = virt_to_page(d->shared_info);
printk("Shared_info@%08x: caf=%08x, taf=%08x\n",
- page_to_phys(page), page->u.inuse.count_info,
+ page_to_phys(page), page->count_info,
page->u.inuse.type_info);
printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n",
for ( i = 0; i < (1 << order); i++ )
{
- pg[i].u.inuse.count_info = PGC_always_set;
- pg[i].u.inuse.domain = NULL;
- pg[i].u.inuse.type_info = 0;
+ pg[i].count_info = PGC_always_set;
+ pg[i].u.inuse.domain = NULL;
+ pg[i].u.inuse.type_info = 0;
}
return (unsigned long)page_to_virt(pg);
}
}
- pg[i].u.inuse.count_info = PGC_always_set;
- pg[i].u.inuse.domain = NULL;
- pg[i].u.inuse.type_info = 0;
+ pg[i].count_info = PGC_always_set;
+ pg[i].u.inuse.domain = NULL;
+ pg[i].u.inuse.type_info = 0;
}
if ( d == NULL )
{
pg[i].u.inuse.domain = d;
wmb(); /* Domain pointer must be visible before updating refcnt. */
- pg[i].u.inuse.count_info |= PGC_allocated | 1;
+ pg[i].count_info |= PGC_allocated | 1;
list_add_tail(&pg[i].list, &d->page_list);
}
/* Each frame can be threaded onto a doubly-linked list. */
struct list_head list;
+ /* Reference count and various PGC_xxx flags and fields. */
+ u32 count_info;
+
/* Context-dependent fields follow... */
union {
- /* Page is in use by a domain. */
+ /* Page is in use: ((count_info & PGC_count_mask) != 0). */
struct {
- /* Owner of this page. */
+ /* Owner of this page (NULL if page is anonymous). */
struct domain *domain;
- /* Reference count and various PGC_xxx flags and fields. */
- u32 count_info;
/* Type reference count and various PGT_xxx flags and fields. */
u32 type_info;
} inuse;
- /* Page is on a free list. */
+ /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
struct {
/* Mask of possibly-tainted TLBs. */
unsigned long cpu_mask;
- /* Must be at same offset as 'u.inuse.count_flags'. */
- u32 __unavailable;
/* Order-size of the free chunk this page is the head of. */
u8 order;
} free;
wmb(); /* install valid domain ptr before updating refcnt. */ \
spin_lock(&(_dom)->page_alloc_lock); \
/* _dom holds an allocation reference */ \
- ASSERT((_pfn)->u.inuse.count_info == PGC_always_set); \
- (_pfn)->u.inuse.count_info |= PGC_allocated | 1; \
+ ASSERT((_pfn)->count_info == PGC_always_set); \
+ (_pfn)->count_info |= PGC_allocated | 1; \
if ( unlikely((_dom)->xenheap_pages++ == 0) ) \
get_knownalive_domain(_dom); \
list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list); \
static inline void put_page(struct pfn_info *page)
{
- u32 nx, x, y = page->u.inuse.count_info;
+ u32 nx, x, y = page->count_info;
do {
x = y;
nx = x - 1;
}
- while ( unlikely((y = cmpxchg(&page->u.inuse.count_info, x, nx)) != x) );
+ while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
if ( unlikely((nx & PGC_count_mask) == 0) )
free_domheap_page(page);
static inline int get_page(struct pfn_info *page,
struct domain *domain)
{
- u32 x, nx, y = page->u.inuse.count_info;
+ u32 x, nx, y = page->count_info;
struct domain *p, *np = page->u.inuse.domain;
do {
}
__asm__ __volatile__(
LOCK_PREFIX "cmpxchg8b %3"
- : "=a" (np), "=d" (y), "=b" (p),
- "=m" (*(volatile u64 *)(&page->u.inuse.domain))
- : "0" (p), "1" (x), "b" (p), "c" (nx) );
+ : "=d" (np), "=a" (y), "=c" (p),
+ "=m" (*(volatile u64 *)(&page->count_info))
+ : "0" (p), "1" (x), "c" (p), "b" (nx) );
}
while ( unlikely(np != p) || unlikely(y != x) );
DPRINTK("Error while validating pfn %08lx for type %08x."
" caf=%08x taf=%08x\n",
page_to_pfn(page), type,
- page->u.inuse.count_info,
+ page->count_info,
page->u.inuse.type_info);
put_page_type(page);
return 0;
ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
#define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
- ASSERT(((_p)->u.inuse.count_info & PGC_count_mask) != 0); \
+ ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
ASSERT((_p)->u.inuse.domain == (_d))
int check_descriptor(unsigned long *d);
mfn, pfn, m->shadow_dirty_bitmap_size, m );
SH_LOG("dom=%u caf=%08x taf=%08x\n",
frame_table[mfn].u.inuse.domain->domain,
- frame_table[mfn].u.inuse.count_info,
+ frame_table[mfn].count_info,
frame_table[mfn].u.inuse.type_info );
}